ipath_verbs_mcast.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/list.h>
  34. #include <linux/rcupdate.h>
  35. #include "ipath_verbs.h"
  36. /*
  37. * Global table of GID to attached QPs.
  38. * The table is global to all ipath devices since a send from one QP/device
  39. * needs to be locally routed to any locally attached QPs on the same
  40. * or different device.
  41. */
  42. static struct rb_root mcast_tree;
  43. static DEFINE_SPINLOCK(mcast_lock);
  44. /**
  45. * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  46. * @qp: the QP to link
  47. */
  48. static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
  49. {
  50. struct ipath_mcast_qp *mqp;
  51. mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
  52. if (!mqp)
  53. goto bail;
  54. mqp->qp = qp;
  55. atomic_inc(&qp->refcount);
  56. bail:
  57. return mqp;
  58. }
  59. static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
  60. {
  61. struct ipath_qp *qp = mqp->qp;
  62. /* Notify ipath_destroy_qp() if it is waiting. */
  63. if (atomic_dec_and_test(&qp->refcount))
  64. wake_up(&qp->wait);
  65. kfree(mqp);
  66. }
  67. /**
  68. * ipath_mcast_alloc - allocate the multicast GID structure
  69. * @mgid: the multicast GID
  70. *
  71. * A list of QPs will be attached to this structure.
  72. */
  73. static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
  74. {
  75. struct ipath_mcast *mcast;
  76. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  77. if (!mcast)
  78. goto bail;
  79. mcast->mgid = *mgid;
  80. INIT_LIST_HEAD(&mcast->qp_list);
  81. init_waitqueue_head(&mcast->wait);
  82. atomic_set(&mcast->refcount, 0);
  83. bail:
  84. return mcast;
  85. }
  86. static void ipath_mcast_free(struct ipath_mcast *mcast)
  87. {
  88. struct ipath_mcast_qp *p, *tmp;
  89. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  90. ipath_mcast_qp_free(p);
  91. kfree(mcast);
  92. }
  93. /**
  94. * ipath_mcast_find - search the global table for the given multicast GID
  95. * @mgid: the multicast GID to search for
  96. *
  97. * Returns NULL if not found.
  98. *
  99. * The caller is responsible for decrementing the reference count if found.
  100. */
  101. struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
  102. {
  103. struct rb_node *n;
  104. unsigned long flags;
  105. struct ipath_mcast *mcast;
  106. spin_lock_irqsave(&mcast_lock, flags);
  107. n = mcast_tree.rb_node;
  108. while (n) {
  109. int ret;
  110. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  111. ret = memcmp(mgid->raw, mcast->mgid.raw,
  112. sizeof(union ib_gid));
  113. if (ret < 0)
  114. n = n->rb_left;
  115. else if (ret > 0)
  116. n = n->rb_right;
  117. else {
  118. atomic_inc(&mcast->refcount);
  119. spin_unlock_irqrestore(&mcast_lock, flags);
  120. goto bail;
  121. }
  122. }
  123. spin_unlock_irqrestore(&mcast_lock, flags);
  124. mcast = NULL;
  125. bail:
  126. return mcast;
  127. }
  128. /**
  129. * ipath_mcast_add - insert mcast GID into table and attach QP struct
  130. * @mcast: the mcast GID table
  131. * @mqp: the QP to attach
  132. *
  133. * Return zero if both were added. Return EEXIST if the GID was already in
  134. * the table but the QP was added. Return ESRCH if the QP was already
  135. * attached and neither structure was added.
  136. */
  137. static int ipath_mcast_add(struct ipath_mcast *mcast,
  138. struct ipath_mcast_qp *mqp)
  139. {
  140. struct rb_node **n = &mcast_tree.rb_node;
  141. struct rb_node *pn = NULL;
  142. unsigned long flags;
  143. int ret;
  144. spin_lock_irqsave(&mcast_lock, flags);
  145. while (*n) {
  146. struct ipath_mcast *tmcast;
  147. struct ipath_mcast_qp *p;
  148. pn = *n;
  149. tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
  150. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  151. sizeof(union ib_gid));
  152. if (ret < 0) {
  153. n = &pn->rb_left;
  154. continue;
  155. }
  156. if (ret > 0) {
  157. n = &pn->rb_right;
  158. continue;
  159. }
  160. /* Search the QP list to see if this is already there. */
  161. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  162. if (p->qp == mqp->qp) {
  163. spin_unlock_irqrestore(&mcast_lock, flags);
  164. ret = ESRCH;
  165. goto bail;
  166. }
  167. }
  168. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  169. spin_unlock_irqrestore(&mcast_lock, flags);
  170. ret = EEXIST;
  171. goto bail;
  172. }
  173. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  174. atomic_inc(&mcast->refcount);
  175. rb_link_node(&mcast->rb_node, pn, n);
  176. rb_insert_color(&mcast->rb_node, &mcast_tree);
  177. spin_unlock_irqrestore(&mcast_lock, flags);
  178. ret = 0;
  179. bail:
  180. return ret;
  181. }
  182. int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  183. {
  184. struct ipath_qp *qp = to_iqp(ibqp);
  185. struct ipath_mcast *mcast;
  186. struct ipath_mcast_qp *mqp;
  187. int ret;
  188. /*
  189. * Allocate data structures since its better to do this outside of
  190. * spin locks and it will most likely be needed.
  191. */
  192. mcast = ipath_mcast_alloc(gid);
  193. if (mcast == NULL) {
  194. ret = -ENOMEM;
  195. goto bail;
  196. }
  197. mqp = ipath_mcast_qp_alloc(qp);
  198. if (mqp == NULL) {
  199. ipath_mcast_free(mcast);
  200. ret = -ENOMEM;
  201. goto bail;
  202. }
  203. switch (ipath_mcast_add(mcast, mqp)) {
  204. case ESRCH:
  205. /* Neither was used: can't attach the same QP twice. */
  206. ipath_mcast_qp_free(mqp);
  207. ipath_mcast_free(mcast);
  208. ret = -EINVAL;
  209. goto bail;
  210. case EEXIST: /* The mcast wasn't used */
  211. ipath_mcast_free(mcast);
  212. break;
  213. default:
  214. break;
  215. }
  216. ret = 0;
  217. bail:
  218. return ret;
  219. }
  220. int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  221. {
  222. struct ipath_qp *qp = to_iqp(ibqp);
  223. struct ipath_mcast *mcast = NULL;
  224. struct ipath_mcast_qp *p, *tmp;
  225. struct rb_node *n;
  226. unsigned long flags;
  227. int last = 0;
  228. int ret;
  229. spin_lock_irqsave(&mcast_lock, flags);
  230. /* Find the GID in the mcast table. */
  231. n = mcast_tree.rb_node;
  232. while (1) {
  233. if (n == NULL) {
  234. spin_unlock_irqrestore(&mcast_lock, flags);
  235. ret = 0;
  236. goto bail;
  237. }
  238. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  239. ret = memcmp(gid->raw, mcast->mgid.raw,
  240. sizeof(union ib_gid));
  241. if (ret < 0)
  242. n = n->rb_left;
  243. else if (ret > 0)
  244. n = n->rb_right;
  245. else
  246. break;
  247. }
  248. /* Search the QP list. */
  249. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  250. if (p->qp != qp)
  251. continue;
  252. /*
  253. * We found it, so remove it, but don't poison the forward
  254. * link until we are sure there are no list walkers.
  255. */
  256. list_del_rcu(&p->list);
  257. /* If this was the last attached QP, remove the GID too. */
  258. if (list_empty(&mcast->qp_list)) {
  259. rb_erase(&mcast->rb_node, &mcast_tree);
  260. last = 1;
  261. }
  262. break;
  263. }
  264. spin_unlock_irqrestore(&mcast_lock, flags);
  265. if (p) {
  266. /*
  267. * Wait for any list walkers to finish before freeing the
  268. * list element.
  269. */
  270. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  271. ipath_mcast_qp_free(p);
  272. }
  273. if (last) {
  274. atomic_dec(&mcast->refcount);
  275. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  276. ipath_mcast_free(mcast);
  277. }
  278. ret = 0;
  279. bail:
  280. return ret;
  281. }
  282. int ipath_mcast_tree_empty(void)
  283. {
  284. return mcast_tree.rb_node == NULL;
  285. }