verbs.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  8. *
  9. * This software is available to you under a choice of one of two
  10. * licenses. You may choose to be licensed under the terms of the GNU
  11. * General Public License (GPL) Version 2, available from the file
  12. * COPYING in the main directory of this source tree, or the
  13. * OpenIB.org BSD license below:
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer.
  22. *
  23. * - Redistributions in binary form must reproduce the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer in the documentation and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35. * SOFTWARE.
  36. *
  37. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  38. */
  39. #include <linux/errno.h>
  40. #include <linux/err.h>
  41. #include <ib_verbs.h>
  42. /* Protection domains */
  43. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  44. {
  45. struct ib_pd *pd;
  46. pd = device->alloc_pd(device, NULL, NULL);
  47. if (!IS_ERR(pd)) {
  48. pd->device = device;
  49. pd->uobject = NULL;
  50. atomic_set(&pd->usecnt, 0);
  51. }
  52. return pd;
  53. }
  54. EXPORT_SYMBOL(ib_alloc_pd);
  55. int ib_dealloc_pd(struct ib_pd *pd)
  56. {
  57. if (atomic_read(&pd->usecnt))
  58. return -EBUSY;
  59. return pd->device->dealloc_pd(pd);
  60. }
  61. EXPORT_SYMBOL(ib_dealloc_pd);
  62. /* Address handles */
  63. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  64. {
  65. struct ib_ah *ah;
  66. ah = pd->device->create_ah(pd, ah_attr);
  67. if (!IS_ERR(ah)) {
  68. ah->device = pd->device;
  69. ah->pd = pd;
  70. ah->uobject = NULL;
  71. atomic_inc(&pd->usecnt);
  72. }
  73. return ah;
  74. }
  75. EXPORT_SYMBOL(ib_create_ah);
  76. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  77. {
  78. return ah->device->modify_ah ?
  79. ah->device->modify_ah(ah, ah_attr) :
  80. -ENOSYS;
  81. }
  82. EXPORT_SYMBOL(ib_modify_ah);
  83. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  84. {
  85. return ah->device->query_ah ?
  86. ah->device->query_ah(ah, ah_attr) :
  87. -ENOSYS;
  88. }
  89. EXPORT_SYMBOL(ib_query_ah);
  90. int ib_destroy_ah(struct ib_ah *ah)
  91. {
  92. struct ib_pd *pd;
  93. int ret;
  94. pd = ah->pd;
  95. ret = ah->device->destroy_ah(ah);
  96. if (!ret)
  97. atomic_dec(&pd->usecnt);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL(ib_destroy_ah);
  101. /* Queue pairs */
  102. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  103. struct ib_qp_init_attr *qp_init_attr)
  104. {
  105. struct ib_qp *qp;
  106. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  107. if (!IS_ERR(qp)) {
  108. qp->device = pd->device;
  109. qp->pd = pd;
  110. qp->send_cq = qp_init_attr->send_cq;
  111. qp->recv_cq = qp_init_attr->recv_cq;
  112. qp->srq = qp_init_attr->srq;
  113. qp->uobject = NULL;
  114. qp->event_handler = qp_init_attr->event_handler;
  115. qp->qp_context = qp_init_attr->qp_context;
  116. qp->qp_type = qp_init_attr->qp_type;
  117. atomic_inc(&pd->usecnt);
  118. atomic_inc(&qp_init_attr->send_cq->usecnt);
  119. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  120. if (qp_init_attr->srq)
  121. atomic_inc(&qp_init_attr->srq->usecnt);
  122. }
  123. return qp;
  124. }
  125. EXPORT_SYMBOL(ib_create_qp);
  126. int ib_modify_qp(struct ib_qp *qp,
  127. struct ib_qp_attr *qp_attr,
  128. int qp_attr_mask)
  129. {
  130. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  131. }
  132. EXPORT_SYMBOL(ib_modify_qp);
  133. int ib_query_qp(struct ib_qp *qp,
  134. struct ib_qp_attr *qp_attr,
  135. int qp_attr_mask,
  136. struct ib_qp_init_attr *qp_init_attr)
  137. {
  138. return qp->device->query_qp ?
  139. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  140. -ENOSYS;
  141. }
  142. EXPORT_SYMBOL(ib_query_qp);
  143. int ib_destroy_qp(struct ib_qp *qp)
  144. {
  145. struct ib_pd *pd;
  146. struct ib_cq *scq, *rcq;
  147. struct ib_srq *srq;
  148. int ret;
  149. pd = qp->pd;
  150. scq = qp->send_cq;
  151. rcq = qp->recv_cq;
  152. srq = qp->srq;
  153. ret = qp->device->destroy_qp(qp);
  154. if (!ret) {
  155. atomic_dec(&pd->usecnt);
  156. atomic_dec(&scq->usecnt);
  157. atomic_dec(&rcq->usecnt);
  158. if (srq)
  159. atomic_dec(&srq->usecnt);
  160. }
  161. return ret;
  162. }
  163. EXPORT_SYMBOL(ib_destroy_qp);
  164. /* Completion queues */
  165. struct ib_cq *ib_create_cq(struct ib_device *device,
  166. ib_comp_handler comp_handler,
  167. void (*event_handler)(struct ib_event *, void *),
  168. void *cq_context, int cqe)
  169. {
  170. struct ib_cq *cq;
  171. cq = device->create_cq(device, cqe, NULL, NULL);
  172. if (!IS_ERR(cq)) {
  173. cq->device = device;
  174. cq->uobject = NULL;
  175. cq->comp_handler = comp_handler;
  176. cq->event_handler = event_handler;
  177. cq->cq_context = cq_context;
  178. atomic_set(&cq->usecnt, 0);
  179. }
  180. return cq;
  181. }
  182. EXPORT_SYMBOL(ib_create_cq);
  183. int ib_destroy_cq(struct ib_cq *cq)
  184. {
  185. if (atomic_read(&cq->usecnt))
  186. return -EBUSY;
  187. return cq->device->destroy_cq(cq);
  188. }
  189. EXPORT_SYMBOL(ib_destroy_cq);
  190. int ib_resize_cq(struct ib_cq *cq,
  191. int cqe)
  192. {
  193. int ret;
  194. if (!cq->device->resize_cq)
  195. return -ENOSYS;
  196. ret = cq->device->resize_cq(cq, &cqe);
  197. if (!ret)
  198. cq->cqe = cqe;
  199. return ret;
  200. }
  201. EXPORT_SYMBOL(ib_resize_cq);
  202. /* Memory regions */
  203. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  204. {
  205. struct ib_mr *mr;
  206. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  207. if (!IS_ERR(mr)) {
  208. mr->device = pd->device;
  209. mr->pd = pd;
  210. mr->uobject = NULL;
  211. atomic_inc(&pd->usecnt);
  212. atomic_set(&mr->usecnt, 0);
  213. }
  214. return mr;
  215. }
  216. EXPORT_SYMBOL(ib_get_dma_mr);
  217. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  218. struct ib_phys_buf *phys_buf_array,
  219. int num_phys_buf,
  220. int mr_access_flags,
  221. u64 *iova_start)
  222. {
  223. struct ib_mr *mr;
  224. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  225. mr_access_flags, iova_start);
  226. if (!IS_ERR(mr)) {
  227. mr->device = pd->device;
  228. mr->pd = pd;
  229. mr->uobject = NULL;
  230. atomic_inc(&pd->usecnt);
  231. atomic_set(&mr->usecnt, 0);
  232. }
  233. return mr;
  234. }
  235. EXPORT_SYMBOL(ib_reg_phys_mr);
  236. int ib_rereg_phys_mr(struct ib_mr *mr,
  237. int mr_rereg_mask,
  238. struct ib_pd *pd,
  239. struct ib_phys_buf *phys_buf_array,
  240. int num_phys_buf,
  241. int mr_access_flags,
  242. u64 *iova_start)
  243. {
  244. struct ib_pd *old_pd;
  245. int ret;
  246. if (!mr->device->rereg_phys_mr)
  247. return -ENOSYS;
  248. if (atomic_read(&mr->usecnt))
  249. return -EBUSY;
  250. old_pd = mr->pd;
  251. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  252. phys_buf_array, num_phys_buf,
  253. mr_access_flags, iova_start);
  254. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  255. atomic_dec(&old_pd->usecnt);
  256. atomic_inc(&pd->usecnt);
  257. }
  258. return ret;
  259. }
  260. EXPORT_SYMBOL(ib_rereg_phys_mr);
  261. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  262. {
  263. return mr->device->query_mr ?
  264. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  265. }
  266. EXPORT_SYMBOL(ib_query_mr);
  267. int ib_dereg_mr(struct ib_mr *mr)
  268. {
  269. struct ib_pd *pd;
  270. int ret;
  271. if (atomic_read(&mr->usecnt))
  272. return -EBUSY;
  273. pd = mr->pd;
  274. ret = mr->device->dereg_mr(mr);
  275. if (!ret)
  276. atomic_dec(&pd->usecnt);
  277. return ret;
  278. }
  279. EXPORT_SYMBOL(ib_dereg_mr);
  280. /* Memory windows */
  281. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  282. {
  283. struct ib_mw *mw;
  284. if (!pd->device->alloc_mw)
  285. return ERR_PTR(-ENOSYS);
  286. mw = pd->device->alloc_mw(pd);
  287. if (!IS_ERR(mw)) {
  288. mw->device = pd->device;
  289. mw->pd = pd;
  290. mw->uobject = NULL;
  291. atomic_inc(&pd->usecnt);
  292. }
  293. return mw;
  294. }
  295. EXPORT_SYMBOL(ib_alloc_mw);
  296. int ib_dealloc_mw(struct ib_mw *mw)
  297. {
  298. struct ib_pd *pd;
  299. int ret;
  300. pd = mw->pd;
  301. ret = mw->device->dealloc_mw(mw);
  302. if (!ret)
  303. atomic_dec(&pd->usecnt);
  304. return ret;
  305. }
  306. EXPORT_SYMBOL(ib_dealloc_mw);
  307. /* "Fast" memory regions */
  308. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  309. int mr_access_flags,
  310. struct ib_fmr_attr *fmr_attr)
  311. {
  312. struct ib_fmr *fmr;
  313. if (!pd->device->alloc_fmr)
  314. return ERR_PTR(-ENOSYS);
  315. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  316. if (!IS_ERR(fmr)) {
  317. fmr->device = pd->device;
  318. fmr->pd = pd;
  319. atomic_inc(&pd->usecnt);
  320. }
  321. return fmr;
  322. }
  323. EXPORT_SYMBOL(ib_alloc_fmr);
  324. int ib_unmap_fmr(struct list_head *fmr_list)
  325. {
  326. struct ib_fmr *fmr;
  327. if (list_empty(fmr_list))
  328. return 0;
  329. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  330. return fmr->device->unmap_fmr(fmr_list);
  331. }
  332. EXPORT_SYMBOL(ib_unmap_fmr);
  333. int ib_dealloc_fmr(struct ib_fmr *fmr)
  334. {
  335. struct ib_pd *pd;
  336. int ret;
  337. pd = fmr->pd;
  338. ret = fmr->device->dealloc_fmr(fmr);
  339. if (!ret)
  340. atomic_dec(&pd->usecnt);
  341. return ret;
  342. }
  343. EXPORT_SYMBOL(ib_dealloc_fmr);
  344. /* Multicast groups */
  345. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  346. {
  347. return qp->device->attach_mcast ?
  348. qp->device->attach_mcast(qp, gid, lid) :
  349. -ENOSYS;
  350. }
  351. EXPORT_SYMBOL(ib_attach_mcast);
  352. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  353. {
  354. return qp->device->detach_mcast ?
  355. qp->device->detach_mcast(qp, gid, lid) :
  356. -ENOSYS;
  357. }
  358. EXPORT_SYMBOL(ib_detach_mcast);