verbs.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  8. *
  9. * This software is available to you under a choice of one of two
  10. * licenses. You may choose to be licensed under the terms of the GNU
  11. * General Public License (GPL) Version 2, available from the file
  12. * COPYING in the main directory of this source tree, or the
  13. * OpenIB.org BSD license below:
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer.
  22. *
  23. * - Redistributions in binary form must reproduce the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer in the documentation and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35. * SOFTWARE.
  36. *
  37. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  38. */
  39. #include <linux/errno.h>
  40. #include <linux/err.h>
  41. #include <ib_verbs.h>
  42. #include <ib_cache.h>
  43. /* Protection domains */
  44. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  45. {
  46. struct ib_pd *pd;
  47. pd = device->alloc_pd(device, NULL, NULL);
  48. if (!IS_ERR(pd)) {
  49. pd->device = device;
  50. pd->uobject = NULL;
  51. atomic_set(&pd->usecnt, 0);
  52. }
  53. return pd;
  54. }
  55. EXPORT_SYMBOL(ib_alloc_pd);
  56. int ib_dealloc_pd(struct ib_pd *pd)
  57. {
  58. if (atomic_read(&pd->usecnt))
  59. return -EBUSY;
  60. return pd->device->dealloc_pd(pd);
  61. }
  62. EXPORT_SYMBOL(ib_dealloc_pd);
  63. /* Address handles */
  64. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  65. {
  66. struct ib_ah *ah;
  67. ah = pd->device->create_ah(pd, ah_attr);
  68. if (!IS_ERR(ah)) {
  69. ah->device = pd->device;
  70. ah->pd = pd;
  71. ah->uobject = NULL;
  72. atomic_inc(&pd->usecnt);
  73. }
  74. return ah;
  75. }
  76. EXPORT_SYMBOL(ib_create_ah);
  77. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  78. struct ib_grh *grh, u8 port_num)
  79. {
  80. struct ib_ah_attr ah_attr;
  81. u32 flow_class;
  82. u16 gid_index;
  83. int ret;
  84. memset(&ah_attr, 0, sizeof ah_attr);
  85. ah_attr.dlid = wc->slid;
  86. ah_attr.sl = wc->sl;
  87. ah_attr.src_path_bits = wc->dlid_path_bits;
  88. ah_attr.port_num = port_num;
  89. if (wc->wc_flags & IB_WC_GRH) {
  90. ah_attr.ah_flags = IB_AH_GRH;
  91. ah_attr.grh.dgid = grh->dgid;
  92. ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
  93. &gid_index);
  94. if (ret)
  95. return ERR_PTR(ret);
  96. ah_attr.grh.sgid_index = (u8) gid_index;
  97. flow_class = be32_to_cpu(grh->version_tclass_flow);
  98. ah_attr.grh.flow_label = flow_class & 0xFFFFF;
  99. ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
  100. ah_attr.grh.hop_limit = grh->hop_limit;
  101. }
  102. return ib_create_ah(pd, &ah_attr);
  103. }
  104. EXPORT_SYMBOL(ib_create_ah_from_wc);
  105. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  106. {
  107. return ah->device->modify_ah ?
  108. ah->device->modify_ah(ah, ah_attr) :
  109. -ENOSYS;
  110. }
  111. EXPORT_SYMBOL(ib_modify_ah);
  112. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  113. {
  114. return ah->device->query_ah ?
  115. ah->device->query_ah(ah, ah_attr) :
  116. -ENOSYS;
  117. }
  118. EXPORT_SYMBOL(ib_query_ah);
  119. int ib_destroy_ah(struct ib_ah *ah)
  120. {
  121. struct ib_pd *pd;
  122. int ret;
  123. pd = ah->pd;
  124. ret = ah->device->destroy_ah(ah);
  125. if (!ret)
  126. atomic_dec(&pd->usecnt);
  127. return ret;
  128. }
  129. EXPORT_SYMBOL(ib_destroy_ah);
  130. /* Queue pairs */
  131. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  132. struct ib_qp_init_attr *qp_init_attr)
  133. {
  134. struct ib_qp *qp;
  135. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  136. if (!IS_ERR(qp)) {
  137. qp->device = pd->device;
  138. qp->pd = pd;
  139. qp->send_cq = qp_init_attr->send_cq;
  140. qp->recv_cq = qp_init_attr->recv_cq;
  141. qp->srq = qp_init_attr->srq;
  142. qp->uobject = NULL;
  143. qp->event_handler = qp_init_attr->event_handler;
  144. qp->qp_context = qp_init_attr->qp_context;
  145. qp->qp_type = qp_init_attr->qp_type;
  146. atomic_inc(&pd->usecnt);
  147. atomic_inc(&qp_init_attr->send_cq->usecnt);
  148. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  149. if (qp_init_attr->srq)
  150. atomic_inc(&qp_init_attr->srq->usecnt);
  151. }
  152. return qp;
  153. }
  154. EXPORT_SYMBOL(ib_create_qp);
  155. int ib_modify_qp(struct ib_qp *qp,
  156. struct ib_qp_attr *qp_attr,
  157. int qp_attr_mask)
  158. {
  159. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  160. }
  161. EXPORT_SYMBOL(ib_modify_qp);
  162. int ib_query_qp(struct ib_qp *qp,
  163. struct ib_qp_attr *qp_attr,
  164. int qp_attr_mask,
  165. struct ib_qp_init_attr *qp_init_attr)
  166. {
  167. return qp->device->query_qp ?
  168. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  169. -ENOSYS;
  170. }
  171. EXPORT_SYMBOL(ib_query_qp);
  172. int ib_destroy_qp(struct ib_qp *qp)
  173. {
  174. struct ib_pd *pd;
  175. struct ib_cq *scq, *rcq;
  176. struct ib_srq *srq;
  177. int ret;
  178. pd = qp->pd;
  179. scq = qp->send_cq;
  180. rcq = qp->recv_cq;
  181. srq = qp->srq;
  182. ret = qp->device->destroy_qp(qp);
  183. if (!ret) {
  184. atomic_dec(&pd->usecnt);
  185. atomic_dec(&scq->usecnt);
  186. atomic_dec(&rcq->usecnt);
  187. if (srq)
  188. atomic_dec(&srq->usecnt);
  189. }
  190. return ret;
  191. }
  192. EXPORT_SYMBOL(ib_destroy_qp);
  193. /* Completion queues */
  194. struct ib_cq *ib_create_cq(struct ib_device *device,
  195. ib_comp_handler comp_handler,
  196. void (*event_handler)(struct ib_event *, void *),
  197. void *cq_context, int cqe)
  198. {
  199. struct ib_cq *cq;
  200. cq = device->create_cq(device, cqe, NULL, NULL);
  201. if (!IS_ERR(cq)) {
  202. cq->device = device;
  203. cq->uobject = NULL;
  204. cq->comp_handler = comp_handler;
  205. cq->event_handler = event_handler;
  206. cq->cq_context = cq_context;
  207. atomic_set(&cq->usecnt, 0);
  208. }
  209. return cq;
  210. }
  211. EXPORT_SYMBOL(ib_create_cq);
  212. int ib_destroy_cq(struct ib_cq *cq)
  213. {
  214. if (atomic_read(&cq->usecnt))
  215. return -EBUSY;
  216. return cq->device->destroy_cq(cq);
  217. }
  218. EXPORT_SYMBOL(ib_destroy_cq);
  219. int ib_resize_cq(struct ib_cq *cq,
  220. int cqe)
  221. {
  222. int ret;
  223. if (!cq->device->resize_cq)
  224. return -ENOSYS;
  225. ret = cq->device->resize_cq(cq, &cqe);
  226. if (!ret)
  227. cq->cqe = cqe;
  228. return ret;
  229. }
  230. EXPORT_SYMBOL(ib_resize_cq);
  231. /* Memory regions */
  232. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  233. {
  234. struct ib_mr *mr;
  235. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  236. if (!IS_ERR(mr)) {
  237. mr->device = pd->device;
  238. mr->pd = pd;
  239. mr->uobject = NULL;
  240. atomic_inc(&pd->usecnt);
  241. atomic_set(&mr->usecnt, 0);
  242. }
  243. return mr;
  244. }
  245. EXPORT_SYMBOL(ib_get_dma_mr);
  246. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  247. struct ib_phys_buf *phys_buf_array,
  248. int num_phys_buf,
  249. int mr_access_flags,
  250. u64 *iova_start)
  251. {
  252. struct ib_mr *mr;
  253. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  254. mr_access_flags, iova_start);
  255. if (!IS_ERR(mr)) {
  256. mr->device = pd->device;
  257. mr->pd = pd;
  258. mr->uobject = NULL;
  259. atomic_inc(&pd->usecnt);
  260. atomic_set(&mr->usecnt, 0);
  261. }
  262. return mr;
  263. }
  264. EXPORT_SYMBOL(ib_reg_phys_mr);
  265. int ib_rereg_phys_mr(struct ib_mr *mr,
  266. int mr_rereg_mask,
  267. struct ib_pd *pd,
  268. struct ib_phys_buf *phys_buf_array,
  269. int num_phys_buf,
  270. int mr_access_flags,
  271. u64 *iova_start)
  272. {
  273. struct ib_pd *old_pd;
  274. int ret;
  275. if (!mr->device->rereg_phys_mr)
  276. return -ENOSYS;
  277. if (atomic_read(&mr->usecnt))
  278. return -EBUSY;
  279. old_pd = mr->pd;
  280. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  281. phys_buf_array, num_phys_buf,
  282. mr_access_flags, iova_start);
  283. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  284. atomic_dec(&old_pd->usecnt);
  285. atomic_inc(&pd->usecnt);
  286. }
  287. return ret;
  288. }
  289. EXPORT_SYMBOL(ib_rereg_phys_mr);
  290. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  291. {
  292. return mr->device->query_mr ?
  293. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  294. }
  295. EXPORT_SYMBOL(ib_query_mr);
  296. int ib_dereg_mr(struct ib_mr *mr)
  297. {
  298. struct ib_pd *pd;
  299. int ret;
  300. if (atomic_read(&mr->usecnt))
  301. return -EBUSY;
  302. pd = mr->pd;
  303. ret = mr->device->dereg_mr(mr);
  304. if (!ret)
  305. atomic_dec(&pd->usecnt);
  306. return ret;
  307. }
  308. EXPORT_SYMBOL(ib_dereg_mr);
  309. /* Memory windows */
  310. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  311. {
  312. struct ib_mw *mw;
  313. if (!pd->device->alloc_mw)
  314. return ERR_PTR(-ENOSYS);
  315. mw = pd->device->alloc_mw(pd);
  316. if (!IS_ERR(mw)) {
  317. mw->device = pd->device;
  318. mw->pd = pd;
  319. mw->uobject = NULL;
  320. atomic_inc(&pd->usecnt);
  321. }
  322. return mw;
  323. }
  324. EXPORT_SYMBOL(ib_alloc_mw);
  325. int ib_dealloc_mw(struct ib_mw *mw)
  326. {
  327. struct ib_pd *pd;
  328. int ret;
  329. pd = mw->pd;
  330. ret = mw->device->dealloc_mw(mw);
  331. if (!ret)
  332. atomic_dec(&pd->usecnt);
  333. return ret;
  334. }
  335. EXPORT_SYMBOL(ib_dealloc_mw);
  336. /* "Fast" memory regions */
  337. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  338. int mr_access_flags,
  339. struct ib_fmr_attr *fmr_attr)
  340. {
  341. struct ib_fmr *fmr;
  342. if (!pd->device->alloc_fmr)
  343. return ERR_PTR(-ENOSYS);
  344. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  345. if (!IS_ERR(fmr)) {
  346. fmr->device = pd->device;
  347. fmr->pd = pd;
  348. atomic_inc(&pd->usecnt);
  349. }
  350. return fmr;
  351. }
  352. EXPORT_SYMBOL(ib_alloc_fmr);
  353. int ib_unmap_fmr(struct list_head *fmr_list)
  354. {
  355. struct ib_fmr *fmr;
  356. if (list_empty(fmr_list))
  357. return 0;
  358. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  359. return fmr->device->unmap_fmr(fmr_list);
  360. }
  361. EXPORT_SYMBOL(ib_unmap_fmr);
  362. int ib_dealloc_fmr(struct ib_fmr *fmr)
  363. {
  364. struct ib_pd *pd;
  365. int ret;
  366. pd = fmr->pd;
  367. ret = fmr->device->dealloc_fmr(fmr);
  368. if (!ret)
  369. atomic_dec(&pd->usecnt);
  370. return ret;
  371. }
  372. EXPORT_SYMBOL(ib_dealloc_fmr);
  373. /* Multicast groups */
  374. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  375. {
  376. return qp->device->attach_mcast ?
  377. qp->device->attach_mcast(qp, gid, lid) :
  378. -ENOSYS;
  379. }
  380. EXPORT_SYMBOL(ib_attach_mcast);
  381. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  382. {
  383. return qp->device->detach_mcast ?
  384. qp->device->detach_mcast(qp, gid, lid) :
  385. -ENOSYS;
  386. }
  387. EXPORT_SYMBOL(ib_detach_mcast);