ipath_verbs_mcast.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /*
  2. * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/rculist.h>
  34. #include "ipath_verbs.h"
  35. /*
  36. * Global table of GID to attached QPs.
  37. * The table is global to all ipath devices since a send from one QP/device
  38. * needs to be locally routed to any locally attached QPs on the same
  39. * or different device.
  40. */
  41. static struct rb_root mcast_tree;
  42. static DEFINE_SPINLOCK(mcast_lock);
  43. /**
  44. * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  45. * @qp: the QP to link
  46. */
  47. static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
  48. {
  49. struct ipath_mcast_qp *mqp;
  50. mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
  51. if (!mqp)
  52. goto bail;
  53. mqp->qp = qp;
  54. atomic_inc(&qp->refcount);
  55. bail:
  56. return mqp;
  57. }
  58. static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
  59. {
  60. struct ipath_qp *qp = mqp->qp;
  61. /* Notify ipath_destroy_qp() if it is waiting. */
  62. if (atomic_dec_and_test(&qp->refcount))
  63. wake_up(&qp->wait);
  64. kfree(mqp);
  65. }
  66. /**
  67. * ipath_mcast_alloc - allocate the multicast GID structure
  68. * @mgid: the multicast GID
  69. *
  70. * A list of QPs will be attached to this structure.
  71. */
  72. static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
  73. {
  74. struct ipath_mcast *mcast;
  75. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  76. if (!mcast)
  77. goto bail;
  78. mcast->mgid = *mgid;
  79. INIT_LIST_HEAD(&mcast->qp_list);
  80. init_waitqueue_head(&mcast->wait);
  81. atomic_set(&mcast->refcount, 0);
  82. mcast->n_attached = 0;
  83. bail:
  84. return mcast;
  85. }
  86. static void ipath_mcast_free(struct ipath_mcast *mcast)
  87. {
  88. struct ipath_mcast_qp *p, *tmp;
  89. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  90. ipath_mcast_qp_free(p);
  91. kfree(mcast);
  92. }
  93. /**
  94. * ipath_mcast_find - search the global table for the given multicast GID
  95. * @mgid: the multicast GID to search for
  96. *
  97. * Returns NULL if not found.
  98. *
  99. * The caller is responsible for decrementing the reference count if found.
  100. */
  101. struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
  102. {
  103. struct rb_node *n;
  104. unsigned long flags;
  105. struct ipath_mcast *mcast;
  106. spin_lock_irqsave(&mcast_lock, flags);
  107. n = mcast_tree.rb_node;
  108. while (n) {
  109. int ret;
  110. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  111. ret = memcmp(mgid->raw, mcast->mgid.raw,
  112. sizeof(union ib_gid));
  113. if (ret < 0)
  114. n = n->rb_left;
  115. else if (ret > 0)
  116. n = n->rb_right;
  117. else {
  118. atomic_inc(&mcast->refcount);
  119. spin_unlock_irqrestore(&mcast_lock, flags);
  120. goto bail;
  121. }
  122. }
  123. spin_unlock_irqrestore(&mcast_lock, flags);
  124. mcast = NULL;
  125. bail:
  126. return mcast;
  127. }
  128. /**
  129. * ipath_mcast_add - insert mcast GID into table and attach QP struct
  130. * @mcast: the mcast GID table
  131. * @mqp: the QP to attach
  132. *
  133. * Return zero if both were added. Return EEXIST if the GID was already in
  134. * the table but the QP was added. Return ESRCH if the QP was already
  135. * attached and neither structure was added.
  136. */
  137. static int ipath_mcast_add(struct ipath_ibdev *dev,
  138. struct ipath_mcast *mcast,
  139. struct ipath_mcast_qp *mqp)
  140. {
  141. struct rb_node **n = &mcast_tree.rb_node;
  142. struct rb_node *pn = NULL;
  143. int ret;
  144. spin_lock_irq(&mcast_lock);
  145. while (*n) {
  146. struct ipath_mcast *tmcast;
  147. struct ipath_mcast_qp *p;
  148. pn = *n;
  149. tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
  150. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  151. sizeof(union ib_gid));
  152. if (ret < 0) {
  153. n = &pn->rb_left;
  154. continue;
  155. }
  156. if (ret > 0) {
  157. n = &pn->rb_right;
  158. continue;
  159. }
  160. /* Search the QP list to see if this is already there. */
  161. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  162. if (p->qp == mqp->qp) {
  163. ret = ESRCH;
  164. goto bail;
  165. }
  166. }
  167. if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
  168. ret = ENOMEM;
  169. goto bail;
  170. }
  171. tmcast->n_attached++;
  172. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  173. ret = EEXIST;
  174. goto bail;
  175. }
  176. spin_lock(&dev->n_mcast_grps_lock);
  177. if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
  178. spin_unlock(&dev->n_mcast_grps_lock);
  179. ret = ENOMEM;
  180. goto bail;
  181. }
  182. dev->n_mcast_grps_allocated++;
  183. spin_unlock(&dev->n_mcast_grps_lock);
  184. mcast->n_attached++;
  185. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  186. atomic_inc(&mcast->refcount);
  187. rb_link_node(&mcast->rb_node, pn, n);
  188. rb_insert_color(&mcast->rb_node, &mcast_tree);
  189. ret = 0;
  190. bail:
  191. spin_unlock_irq(&mcast_lock);
  192. return ret;
  193. }
  194. int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  195. {
  196. struct ipath_qp *qp = to_iqp(ibqp);
  197. struct ipath_ibdev *dev = to_idev(ibqp->device);
  198. struct ipath_mcast *mcast;
  199. struct ipath_mcast_qp *mqp;
  200. int ret;
  201. /*
  202. * Allocate data structures since its better to do this outside of
  203. * spin locks and it will most likely be needed.
  204. */
  205. mcast = ipath_mcast_alloc(gid);
  206. if (mcast == NULL) {
  207. ret = -ENOMEM;
  208. goto bail;
  209. }
  210. mqp = ipath_mcast_qp_alloc(qp);
  211. if (mqp == NULL) {
  212. ipath_mcast_free(mcast);
  213. ret = -ENOMEM;
  214. goto bail;
  215. }
  216. switch (ipath_mcast_add(dev, mcast, mqp)) {
  217. case ESRCH:
  218. /* Neither was used: can't attach the same QP twice. */
  219. ipath_mcast_qp_free(mqp);
  220. ipath_mcast_free(mcast);
  221. ret = -EINVAL;
  222. goto bail;
  223. case EEXIST: /* The mcast wasn't used */
  224. ipath_mcast_free(mcast);
  225. break;
  226. case ENOMEM:
  227. /* Exceeded the maximum number of mcast groups. */
  228. ipath_mcast_qp_free(mqp);
  229. ipath_mcast_free(mcast);
  230. ret = -ENOMEM;
  231. goto bail;
  232. default:
  233. break;
  234. }
  235. ret = 0;
  236. bail:
  237. return ret;
  238. }
  239. int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  240. {
  241. struct ipath_qp *qp = to_iqp(ibqp);
  242. struct ipath_ibdev *dev = to_idev(ibqp->device);
  243. struct ipath_mcast *mcast = NULL;
  244. struct ipath_mcast_qp *p, *tmp;
  245. struct rb_node *n;
  246. int last = 0;
  247. int ret;
  248. spin_lock_irq(&mcast_lock);
  249. /* Find the GID in the mcast table. */
  250. n = mcast_tree.rb_node;
  251. while (1) {
  252. if (n == NULL) {
  253. spin_unlock_irq(&mcast_lock);
  254. ret = -EINVAL;
  255. goto bail;
  256. }
  257. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  258. ret = memcmp(gid->raw, mcast->mgid.raw,
  259. sizeof(union ib_gid));
  260. if (ret < 0)
  261. n = n->rb_left;
  262. else if (ret > 0)
  263. n = n->rb_right;
  264. else
  265. break;
  266. }
  267. /* Search the QP list. */
  268. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  269. if (p->qp != qp)
  270. continue;
  271. /*
  272. * We found it, so remove it, but don't poison the forward
  273. * link until we are sure there are no list walkers.
  274. */
  275. list_del_rcu(&p->list);
  276. mcast->n_attached--;
  277. /* If this was the last attached QP, remove the GID too. */
  278. if (list_empty(&mcast->qp_list)) {
  279. rb_erase(&mcast->rb_node, &mcast_tree);
  280. last = 1;
  281. }
  282. break;
  283. }
  284. spin_unlock_irq(&mcast_lock);
  285. if (p) {
  286. /*
  287. * Wait for any list walkers to finish before freeing the
  288. * list element.
  289. */
  290. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  291. ipath_mcast_qp_free(p);
  292. }
  293. if (last) {
  294. atomic_dec(&mcast->refcount);
  295. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  296. ipath_mcast_free(mcast);
  297. spin_lock_irq(&dev->n_mcast_grps_lock);
  298. dev->n_mcast_grps_allocated--;
  299. spin_unlock_irq(&dev->n_mcast_grps_lock);
  300. }
  301. ret = 0;
  302. bail:
  303. return ret;
  304. }
  305. int ipath_mcast_tree_empty(void)
  306. {
  307. return mcast_tree.rb_node == NULL;
  308. }