verbs.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. *
  36. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  37. */
  38. #include <linux/errno.h>
  39. #include <linux/err.h>
  40. #include <ib_verbs.h>
  41. /* Protection domains */
  42. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  43. {
  44. struct ib_pd *pd;
  45. pd = device->alloc_pd(device);
  46. if (!IS_ERR(pd)) {
  47. pd->device = device;
  48. atomic_set(&pd->usecnt, 0);
  49. }
  50. return pd;
  51. }
  52. EXPORT_SYMBOL(ib_alloc_pd);
  53. int ib_dealloc_pd(struct ib_pd *pd)
  54. {
  55. if (atomic_read(&pd->usecnt))
  56. return -EBUSY;
  57. return pd->device->dealloc_pd(pd);
  58. }
  59. EXPORT_SYMBOL(ib_dealloc_pd);
  60. /* Address handles */
  61. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  62. {
  63. struct ib_ah *ah;
  64. ah = pd->device->create_ah(pd, ah_attr);
  65. if (!IS_ERR(ah)) {
  66. ah->device = pd->device;
  67. ah->pd = pd;
  68. atomic_inc(&pd->usecnt);
  69. }
  70. return ah;
  71. }
  72. EXPORT_SYMBOL(ib_create_ah);
  73. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  74. {
  75. return ah->device->modify_ah ?
  76. ah->device->modify_ah(ah, ah_attr) :
  77. -ENOSYS;
  78. }
  79. EXPORT_SYMBOL(ib_modify_ah);
  80. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  81. {
  82. return ah->device->query_ah ?
  83. ah->device->query_ah(ah, ah_attr) :
  84. -ENOSYS;
  85. }
  86. EXPORT_SYMBOL(ib_query_ah);
  87. int ib_destroy_ah(struct ib_ah *ah)
  88. {
  89. struct ib_pd *pd;
  90. int ret;
  91. pd = ah->pd;
  92. ret = ah->device->destroy_ah(ah);
  93. if (!ret)
  94. atomic_dec(&pd->usecnt);
  95. return ret;
  96. }
  97. EXPORT_SYMBOL(ib_destroy_ah);
  98. /* Queue pairs */
  99. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  100. struct ib_qp_init_attr *qp_init_attr)
  101. {
  102. struct ib_qp *qp;
  103. qp = pd->device->create_qp(pd, qp_init_attr);
  104. if (!IS_ERR(qp)) {
  105. qp->device = pd->device;
  106. qp->pd = pd;
  107. qp->send_cq = qp_init_attr->send_cq;
  108. qp->recv_cq = qp_init_attr->recv_cq;
  109. qp->srq = qp_init_attr->srq;
  110. qp->event_handler = qp_init_attr->event_handler;
  111. qp->qp_context = qp_init_attr->qp_context;
  112. qp->qp_type = qp_init_attr->qp_type;
  113. atomic_inc(&pd->usecnt);
  114. atomic_inc(&qp_init_attr->send_cq->usecnt);
  115. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  116. if (qp_init_attr->srq)
  117. atomic_inc(&qp_init_attr->srq->usecnt);
  118. }
  119. return qp;
  120. }
  121. EXPORT_SYMBOL(ib_create_qp);
  122. int ib_modify_qp(struct ib_qp *qp,
  123. struct ib_qp_attr *qp_attr,
  124. int qp_attr_mask)
  125. {
  126. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  127. }
  128. EXPORT_SYMBOL(ib_modify_qp);
  129. int ib_query_qp(struct ib_qp *qp,
  130. struct ib_qp_attr *qp_attr,
  131. int qp_attr_mask,
  132. struct ib_qp_init_attr *qp_init_attr)
  133. {
  134. return qp->device->query_qp ?
  135. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  136. -ENOSYS;
  137. }
  138. EXPORT_SYMBOL(ib_query_qp);
  139. int ib_destroy_qp(struct ib_qp *qp)
  140. {
  141. struct ib_pd *pd;
  142. struct ib_cq *scq, *rcq;
  143. struct ib_srq *srq;
  144. int ret;
  145. pd = qp->pd;
  146. scq = qp->send_cq;
  147. rcq = qp->recv_cq;
  148. srq = qp->srq;
  149. ret = qp->device->destroy_qp(qp);
  150. if (!ret) {
  151. atomic_dec(&pd->usecnt);
  152. atomic_dec(&scq->usecnt);
  153. atomic_dec(&rcq->usecnt);
  154. if (srq)
  155. atomic_dec(&srq->usecnt);
  156. }
  157. return ret;
  158. }
  159. EXPORT_SYMBOL(ib_destroy_qp);
  160. /* Completion queues */
  161. struct ib_cq *ib_create_cq(struct ib_device *device,
  162. ib_comp_handler comp_handler,
  163. void (*event_handler)(struct ib_event *, void *),
  164. void *cq_context, int cqe)
  165. {
  166. struct ib_cq *cq;
  167. cq = device->create_cq(device, cqe);
  168. if (!IS_ERR(cq)) {
  169. cq->device = device;
  170. cq->comp_handler = comp_handler;
  171. cq->event_handler = event_handler;
  172. cq->cq_context = cq_context;
  173. atomic_set(&cq->usecnt, 0);
  174. }
  175. return cq;
  176. }
  177. EXPORT_SYMBOL(ib_create_cq);
  178. int ib_destroy_cq(struct ib_cq *cq)
  179. {
  180. if (atomic_read(&cq->usecnt))
  181. return -EBUSY;
  182. return cq->device->destroy_cq(cq);
  183. }
  184. EXPORT_SYMBOL(ib_destroy_cq);
  185. int ib_resize_cq(struct ib_cq *cq,
  186. int cqe)
  187. {
  188. int ret;
  189. if (!cq->device->resize_cq)
  190. return -ENOSYS;
  191. ret = cq->device->resize_cq(cq, &cqe);
  192. if (!ret)
  193. cq->cqe = cqe;
  194. return ret;
  195. }
  196. EXPORT_SYMBOL(ib_resize_cq);
  197. /* Memory regions */
  198. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  199. {
  200. struct ib_mr *mr;
  201. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  202. if (!IS_ERR(mr)) {
  203. mr->device = pd->device;
  204. mr->pd = pd;
  205. atomic_inc(&pd->usecnt);
  206. atomic_set(&mr->usecnt, 0);
  207. }
  208. return mr;
  209. }
  210. EXPORT_SYMBOL(ib_get_dma_mr);
  211. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  212. struct ib_phys_buf *phys_buf_array,
  213. int num_phys_buf,
  214. int mr_access_flags,
  215. u64 *iova_start)
  216. {
  217. struct ib_mr *mr;
  218. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  219. mr_access_flags, iova_start);
  220. if (!IS_ERR(mr)) {
  221. mr->device = pd->device;
  222. mr->pd = pd;
  223. atomic_inc(&pd->usecnt);
  224. atomic_set(&mr->usecnt, 0);
  225. }
  226. return mr;
  227. }
  228. EXPORT_SYMBOL(ib_reg_phys_mr);
  229. int ib_rereg_phys_mr(struct ib_mr *mr,
  230. int mr_rereg_mask,
  231. struct ib_pd *pd,
  232. struct ib_phys_buf *phys_buf_array,
  233. int num_phys_buf,
  234. int mr_access_flags,
  235. u64 *iova_start)
  236. {
  237. struct ib_pd *old_pd;
  238. int ret;
  239. if (!mr->device->rereg_phys_mr)
  240. return -ENOSYS;
  241. if (atomic_read(&mr->usecnt))
  242. return -EBUSY;
  243. old_pd = mr->pd;
  244. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  245. phys_buf_array, num_phys_buf,
  246. mr_access_flags, iova_start);
  247. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  248. atomic_dec(&old_pd->usecnt);
  249. atomic_inc(&pd->usecnt);
  250. }
  251. return ret;
  252. }
  253. EXPORT_SYMBOL(ib_rereg_phys_mr);
  254. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  255. {
  256. return mr->device->query_mr ?
  257. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  258. }
  259. EXPORT_SYMBOL(ib_query_mr);
  260. int ib_dereg_mr(struct ib_mr *mr)
  261. {
  262. struct ib_pd *pd;
  263. int ret;
  264. if (atomic_read(&mr->usecnt))
  265. return -EBUSY;
  266. pd = mr->pd;
  267. ret = mr->device->dereg_mr(mr);
  268. if (!ret)
  269. atomic_dec(&pd->usecnt);
  270. return ret;
  271. }
  272. EXPORT_SYMBOL(ib_dereg_mr);
  273. /* Memory windows */
  274. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  275. {
  276. struct ib_mw *mw;
  277. if (!pd->device->alloc_mw)
  278. return ERR_PTR(-ENOSYS);
  279. mw = pd->device->alloc_mw(pd);
  280. if (!IS_ERR(mw)) {
  281. mw->device = pd->device;
  282. mw->pd = pd;
  283. atomic_inc(&pd->usecnt);
  284. }
  285. return mw;
  286. }
  287. EXPORT_SYMBOL(ib_alloc_mw);
  288. int ib_dealloc_mw(struct ib_mw *mw)
  289. {
  290. struct ib_pd *pd;
  291. int ret;
  292. pd = mw->pd;
  293. ret = mw->device->dealloc_mw(mw);
  294. if (!ret)
  295. atomic_dec(&pd->usecnt);
  296. return ret;
  297. }
  298. EXPORT_SYMBOL(ib_dealloc_mw);
  299. /* "Fast" memory regions */
  300. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  301. int mr_access_flags,
  302. struct ib_fmr_attr *fmr_attr)
  303. {
  304. struct ib_fmr *fmr;
  305. if (!pd->device->alloc_fmr)
  306. return ERR_PTR(-ENOSYS);
  307. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  308. if (!IS_ERR(fmr)) {
  309. fmr->device = pd->device;
  310. fmr->pd = pd;
  311. atomic_inc(&pd->usecnt);
  312. }
  313. return fmr;
  314. }
  315. EXPORT_SYMBOL(ib_alloc_fmr);
  316. int ib_unmap_fmr(struct list_head *fmr_list)
  317. {
  318. struct ib_fmr *fmr;
  319. if (list_empty(fmr_list))
  320. return 0;
  321. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  322. return fmr->device->unmap_fmr(fmr_list);
  323. }
  324. EXPORT_SYMBOL(ib_unmap_fmr);
  325. int ib_dealloc_fmr(struct ib_fmr *fmr)
  326. {
  327. struct ib_pd *pd;
  328. int ret;
  329. pd = fmr->pd;
  330. ret = fmr->device->dealloc_fmr(fmr);
  331. if (!ret)
  332. atomic_dec(&pd->usecnt);
  333. return ret;
  334. }
  335. EXPORT_SYMBOL(ib_dealloc_fmr);
  336. /* Multicast groups */
  337. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  338. {
  339. return qp->device->attach_mcast ?
  340. qp->device->attach_mcast(qp, gid, lid) :
  341. -ENOSYS;
  342. }
  343. EXPORT_SYMBOL(ib_attach_mcast);
  344. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  345. {
  346. return qp->device->detach_mcast ?
  347. qp->device->detach_mcast(qp, gid, lid) :
  348. -ENOSYS;
  349. }
  350. EXPORT_SYMBOL(ib_detach_mcast);