ipath_verbs_mcast.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/list.h>
  33. #include <linux/rcupdate.h>
  34. #include "ipath_verbs.h"
  35. /*
  36. * Global table of GID to attached QPs.
  37. * The table is global to all ipath devices since a send from one QP/device
  38. * needs to be locally routed to any locally attached QPs on the same
  39. * or different device.
  40. */
  41. static struct rb_root mcast_tree;
  42. static DEFINE_SPINLOCK(mcast_lock);
  43. /**
  44. * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  45. * @qp: the QP to link
  46. */
  47. static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
  48. {
  49. struct ipath_mcast_qp *mqp;
  50. mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
  51. if (!mqp)
  52. goto bail;
  53. mqp->qp = qp;
  54. atomic_inc(&qp->refcount);
  55. bail:
  56. return mqp;
  57. }
  58. static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
  59. {
  60. struct ipath_qp *qp = mqp->qp;
  61. /* Notify ipath_destroy_qp() if it is waiting. */
  62. if (atomic_dec_and_test(&qp->refcount))
  63. wake_up(&qp->wait);
  64. kfree(mqp);
  65. }
  66. /**
  67. * ipath_mcast_alloc - allocate the multicast GID structure
  68. * @mgid: the multicast GID
  69. *
  70. * A list of QPs will be attached to this structure.
  71. */
  72. static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
  73. {
  74. struct ipath_mcast *mcast;
  75. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  76. if (!mcast)
  77. goto bail;
  78. mcast->mgid = *mgid;
  79. INIT_LIST_HEAD(&mcast->qp_list);
  80. init_waitqueue_head(&mcast->wait);
  81. atomic_set(&mcast->refcount, 0);
  82. bail:
  83. return mcast;
  84. }
  85. static void ipath_mcast_free(struct ipath_mcast *mcast)
  86. {
  87. struct ipath_mcast_qp *p, *tmp;
  88. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  89. ipath_mcast_qp_free(p);
  90. kfree(mcast);
  91. }
  92. /**
  93. * ipath_mcast_find - search the global table for the given multicast GID
  94. * @mgid: the multicast GID to search for
  95. *
  96. * Returns NULL if not found.
  97. *
  98. * The caller is responsible for decrementing the reference count if found.
  99. */
  100. struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
  101. {
  102. struct rb_node *n;
  103. unsigned long flags;
  104. struct ipath_mcast *mcast;
  105. spin_lock_irqsave(&mcast_lock, flags);
  106. n = mcast_tree.rb_node;
  107. while (n) {
  108. int ret;
  109. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  110. ret = memcmp(mgid->raw, mcast->mgid.raw,
  111. sizeof(union ib_gid));
  112. if (ret < 0)
  113. n = n->rb_left;
  114. else if (ret > 0)
  115. n = n->rb_right;
  116. else {
  117. atomic_inc(&mcast->refcount);
  118. spin_unlock_irqrestore(&mcast_lock, flags);
  119. goto bail;
  120. }
  121. }
  122. spin_unlock_irqrestore(&mcast_lock, flags);
  123. mcast = NULL;
  124. bail:
  125. return mcast;
  126. }
  127. /**
  128. * ipath_mcast_add - insert mcast GID into table and attach QP struct
  129. * @mcast: the mcast GID table
  130. * @mqp: the QP to attach
  131. *
  132. * Return zero if both were added. Return EEXIST if the GID was already in
  133. * the table but the QP was added. Return ESRCH if the QP was already
  134. * attached and neither structure was added.
  135. */
  136. static int ipath_mcast_add(struct ipath_mcast *mcast,
  137. struct ipath_mcast_qp *mqp)
  138. {
  139. struct rb_node **n = &mcast_tree.rb_node;
  140. struct rb_node *pn = NULL;
  141. unsigned long flags;
  142. int ret;
  143. spin_lock_irqsave(&mcast_lock, flags);
  144. while (*n) {
  145. struct ipath_mcast *tmcast;
  146. struct ipath_mcast_qp *p;
  147. pn = *n;
  148. tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
  149. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  150. sizeof(union ib_gid));
  151. if (ret < 0) {
  152. n = &pn->rb_left;
  153. continue;
  154. }
  155. if (ret > 0) {
  156. n = &pn->rb_right;
  157. continue;
  158. }
  159. /* Search the QP list to see if this is already there. */
  160. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  161. if (p->qp == mqp->qp) {
  162. spin_unlock_irqrestore(&mcast_lock, flags);
  163. ret = ESRCH;
  164. goto bail;
  165. }
  166. }
  167. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  168. spin_unlock_irqrestore(&mcast_lock, flags);
  169. ret = EEXIST;
  170. goto bail;
  171. }
  172. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  173. atomic_inc(&mcast->refcount);
  174. rb_link_node(&mcast->rb_node, pn, n);
  175. rb_insert_color(&mcast->rb_node, &mcast_tree);
  176. spin_unlock_irqrestore(&mcast_lock, flags);
  177. ret = 0;
  178. bail:
  179. return ret;
  180. }
  181. int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  182. {
  183. struct ipath_qp *qp = to_iqp(ibqp);
  184. struct ipath_mcast *mcast;
  185. struct ipath_mcast_qp *mqp;
  186. int ret;
  187. /*
  188. * Allocate data structures since its better to do this outside of
  189. * spin locks and it will most likely be needed.
  190. */
  191. mcast = ipath_mcast_alloc(gid);
  192. if (mcast == NULL) {
  193. ret = -ENOMEM;
  194. goto bail;
  195. }
  196. mqp = ipath_mcast_qp_alloc(qp);
  197. if (mqp == NULL) {
  198. ipath_mcast_free(mcast);
  199. ret = -ENOMEM;
  200. goto bail;
  201. }
  202. switch (ipath_mcast_add(mcast, mqp)) {
  203. case ESRCH:
  204. /* Neither was used: can't attach the same QP twice. */
  205. ipath_mcast_qp_free(mqp);
  206. ipath_mcast_free(mcast);
  207. ret = -EINVAL;
  208. goto bail;
  209. case EEXIST: /* The mcast wasn't used */
  210. ipath_mcast_free(mcast);
  211. break;
  212. default:
  213. break;
  214. }
  215. ret = 0;
  216. bail:
  217. return ret;
  218. }
  219. int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  220. {
  221. struct ipath_qp *qp = to_iqp(ibqp);
  222. struct ipath_mcast *mcast = NULL;
  223. struct ipath_mcast_qp *p, *tmp;
  224. struct rb_node *n;
  225. unsigned long flags;
  226. int last = 0;
  227. int ret;
  228. spin_lock_irqsave(&mcast_lock, flags);
  229. /* Find the GID in the mcast table. */
  230. n = mcast_tree.rb_node;
  231. while (1) {
  232. if (n == NULL) {
  233. spin_unlock_irqrestore(&mcast_lock, flags);
  234. ret = 0;
  235. goto bail;
  236. }
  237. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  238. ret = memcmp(gid->raw, mcast->mgid.raw,
  239. sizeof(union ib_gid));
  240. if (ret < 0)
  241. n = n->rb_left;
  242. else if (ret > 0)
  243. n = n->rb_right;
  244. else
  245. break;
  246. }
  247. /* Search the QP list. */
  248. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  249. if (p->qp != qp)
  250. continue;
  251. /*
  252. * We found it, so remove it, but don't poison the forward
  253. * link until we are sure there are no list walkers.
  254. */
  255. list_del_rcu(&p->list);
  256. /* If this was the last attached QP, remove the GID too. */
  257. if (list_empty(&mcast->qp_list)) {
  258. rb_erase(&mcast->rb_node, &mcast_tree);
  259. last = 1;
  260. }
  261. break;
  262. }
  263. spin_unlock_irqrestore(&mcast_lock, flags);
  264. if (p) {
  265. /*
  266. * Wait for any list walkers to finish before freeing the
  267. * list element.
  268. */
  269. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  270. ipath_mcast_qp_free(p);
  271. }
  272. if (last) {
  273. atomic_dec(&mcast->refcount);
  274. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  275. ipath_mcast_free(mcast);
  276. }
  277. ret = 0;
  278. bail:
  279. return ret;
  280. }
  281. int ipath_mcast_tree_empty(void)
  282. {
  283. return mcast_tree.rb_node == NULL;
  284. }