verbs.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/err.h>
  42. #include <rdma/ib_verbs.h>
  43. #include <rdma/ib_cache.h>
  44. /* Protection domains */
  45. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  46. {
  47. struct ib_pd *pd;
  48. pd = device->alloc_pd(device, NULL, NULL);
  49. if (!IS_ERR(pd)) {
  50. pd->device = device;
  51. pd->uobject = NULL;
  52. atomic_set(&pd->usecnt, 0);
  53. }
  54. return pd;
  55. }
  56. EXPORT_SYMBOL(ib_alloc_pd);
  57. int ib_dealloc_pd(struct ib_pd *pd)
  58. {
  59. if (atomic_read(&pd->usecnt))
  60. return -EBUSY;
  61. return pd->device->dealloc_pd(pd);
  62. }
  63. EXPORT_SYMBOL(ib_dealloc_pd);
  64. /* Address handles */
  65. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  66. {
  67. struct ib_ah *ah;
  68. ah = pd->device->create_ah(pd, ah_attr);
  69. if (!IS_ERR(ah)) {
  70. ah->device = pd->device;
  71. ah->pd = pd;
  72. ah->uobject = NULL;
  73. atomic_inc(&pd->usecnt);
  74. }
  75. return ah;
  76. }
  77. EXPORT_SYMBOL(ib_create_ah);
  78. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  79. struct ib_grh *grh, u8 port_num)
  80. {
  81. struct ib_ah_attr ah_attr;
  82. u32 flow_class;
  83. u16 gid_index;
  84. int ret;
  85. memset(&ah_attr, 0, sizeof ah_attr);
  86. ah_attr.dlid = wc->slid;
  87. ah_attr.sl = wc->sl;
  88. ah_attr.src_path_bits = wc->dlid_path_bits;
  89. ah_attr.port_num = port_num;
  90. if (wc->wc_flags & IB_WC_GRH) {
  91. ah_attr.ah_flags = IB_AH_GRH;
  92. ah_attr.grh.dgid = grh->dgid;
  93. ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
  94. &gid_index);
  95. if (ret)
  96. return ERR_PTR(ret);
  97. ah_attr.grh.sgid_index = (u8) gid_index;
  98. flow_class = be32_to_cpu(grh->version_tclass_flow);
  99. ah_attr.grh.flow_label = flow_class & 0xFFFFF;
  100. ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
  101. ah_attr.grh.hop_limit = grh->hop_limit;
  102. }
  103. return ib_create_ah(pd, &ah_attr);
  104. }
  105. EXPORT_SYMBOL(ib_create_ah_from_wc);
  106. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  107. {
  108. return ah->device->modify_ah ?
  109. ah->device->modify_ah(ah, ah_attr) :
  110. -ENOSYS;
  111. }
  112. EXPORT_SYMBOL(ib_modify_ah);
  113. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  114. {
  115. return ah->device->query_ah ?
  116. ah->device->query_ah(ah, ah_attr) :
  117. -ENOSYS;
  118. }
  119. EXPORT_SYMBOL(ib_query_ah);
  120. int ib_destroy_ah(struct ib_ah *ah)
  121. {
  122. struct ib_pd *pd;
  123. int ret;
  124. pd = ah->pd;
  125. ret = ah->device->destroy_ah(ah);
  126. if (!ret)
  127. atomic_dec(&pd->usecnt);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL(ib_destroy_ah);
  131. /* Shared receive queues */
  132. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  133. struct ib_srq_init_attr *srq_init_attr)
  134. {
  135. struct ib_srq *srq;
  136. if (!pd->device->create_srq)
  137. return ERR_PTR(-ENOSYS);
  138. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  139. if (!IS_ERR(srq)) {
  140. srq->device = pd->device;
  141. srq->pd = pd;
  142. srq->uobject = NULL;
  143. srq->event_handler = srq_init_attr->event_handler;
  144. srq->srq_context = srq_init_attr->srq_context;
  145. atomic_inc(&pd->usecnt);
  146. atomic_set(&srq->usecnt, 0);
  147. }
  148. return srq;
  149. }
  150. EXPORT_SYMBOL(ib_create_srq);
  151. int ib_modify_srq(struct ib_srq *srq,
  152. struct ib_srq_attr *srq_attr,
  153. enum ib_srq_attr_mask srq_attr_mask)
  154. {
  155. return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
  156. }
  157. EXPORT_SYMBOL(ib_modify_srq);
  158. int ib_query_srq(struct ib_srq *srq,
  159. struct ib_srq_attr *srq_attr)
  160. {
  161. return srq->device->query_srq ?
  162. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  163. }
  164. EXPORT_SYMBOL(ib_query_srq);
  165. int ib_destroy_srq(struct ib_srq *srq)
  166. {
  167. struct ib_pd *pd;
  168. int ret;
  169. if (atomic_read(&srq->usecnt))
  170. return -EBUSY;
  171. pd = srq->pd;
  172. ret = srq->device->destroy_srq(srq);
  173. if (!ret)
  174. atomic_dec(&pd->usecnt);
  175. return ret;
  176. }
  177. EXPORT_SYMBOL(ib_destroy_srq);
  178. /* Queue pairs */
  179. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  180. struct ib_qp_init_attr *qp_init_attr)
  181. {
  182. struct ib_qp *qp;
  183. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  184. if (!IS_ERR(qp)) {
  185. qp->device = pd->device;
  186. qp->pd = pd;
  187. qp->send_cq = qp_init_attr->send_cq;
  188. qp->recv_cq = qp_init_attr->recv_cq;
  189. qp->srq = qp_init_attr->srq;
  190. qp->uobject = NULL;
  191. qp->event_handler = qp_init_attr->event_handler;
  192. qp->qp_context = qp_init_attr->qp_context;
  193. qp->qp_type = qp_init_attr->qp_type;
  194. atomic_inc(&pd->usecnt);
  195. atomic_inc(&qp_init_attr->send_cq->usecnt);
  196. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  197. if (qp_init_attr->srq)
  198. atomic_inc(&qp_init_attr->srq->usecnt);
  199. }
  200. return qp;
  201. }
  202. EXPORT_SYMBOL(ib_create_qp);
  203. int ib_modify_qp(struct ib_qp *qp,
  204. struct ib_qp_attr *qp_attr,
  205. int qp_attr_mask)
  206. {
  207. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  208. }
  209. EXPORT_SYMBOL(ib_modify_qp);
  210. int ib_query_qp(struct ib_qp *qp,
  211. struct ib_qp_attr *qp_attr,
  212. int qp_attr_mask,
  213. struct ib_qp_init_attr *qp_init_attr)
  214. {
  215. return qp->device->query_qp ?
  216. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  217. -ENOSYS;
  218. }
  219. EXPORT_SYMBOL(ib_query_qp);
  220. int ib_destroy_qp(struct ib_qp *qp)
  221. {
  222. struct ib_pd *pd;
  223. struct ib_cq *scq, *rcq;
  224. struct ib_srq *srq;
  225. int ret;
  226. pd = qp->pd;
  227. scq = qp->send_cq;
  228. rcq = qp->recv_cq;
  229. srq = qp->srq;
  230. ret = qp->device->destroy_qp(qp);
  231. if (!ret) {
  232. atomic_dec(&pd->usecnt);
  233. atomic_dec(&scq->usecnt);
  234. atomic_dec(&rcq->usecnt);
  235. if (srq)
  236. atomic_dec(&srq->usecnt);
  237. }
  238. return ret;
  239. }
  240. EXPORT_SYMBOL(ib_destroy_qp);
  241. /* Completion queues */
  242. struct ib_cq *ib_create_cq(struct ib_device *device,
  243. ib_comp_handler comp_handler,
  244. void (*event_handler)(struct ib_event *, void *),
  245. void *cq_context, int cqe)
  246. {
  247. struct ib_cq *cq;
  248. cq = device->create_cq(device, cqe, NULL, NULL);
  249. if (!IS_ERR(cq)) {
  250. cq->device = device;
  251. cq->uobject = NULL;
  252. cq->comp_handler = comp_handler;
  253. cq->event_handler = event_handler;
  254. cq->cq_context = cq_context;
  255. atomic_set(&cq->usecnt, 0);
  256. }
  257. return cq;
  258. }
  259. EXPORT_SYMBOL(ib_create_cq);
  260. int ib_destroy_cq(struct ib_cq *cq)
  261. {
  262. if (atomic_read(&cq->usecnt))
  263. return -EBUSY;
  264. return cq->device->destroy_cq(cq);
  265. }
  266. EXPORT_SYMBOL(ib_destroy_cq);
  267. int ib_resize_cq(struct ib_cq *cq,
  268. int cqe)
  269. {
  270. int ret;
  271. if (!cq->device->resize_cq)
  272. return -ENOSYS;
  273. ret = cq->device->resize_cq(cq, &cqe);
  274. if (!ret)
  275. cq->cqe = cqe;
  276. return ret;
  277. }
  278. EXPORT_SYMBOL(ib_resize_cq);
  279. /* Memory regions */
  280. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  281. {
  282. struct ib_mr *mr;
  283. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  284. if (!IS_ERR(mr)) {
  285. mr->device = pd->device;
  286. mr->pd = pd;
  287. mr->uobject = NULL;
  288. atomic_inc(&pd->usecnt);
  289. atomic_set(&mr->usecnt, 0);
  290. }
  291. return mr;
  292. }
  293. EXPORT_SYMBOL(ib_get_dma_mr);
  294. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  295. struct ib_phys_buf *phys_buf_array,
  296. int num_phys_buf,
  297. int mr_access_flags,
  298. u64 *iova_start)
  299. {
  300. struct ib_mr *mr;
  301. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  302. mr_access_flags, iova_start);
  303. if (!IS_ERR(mr)) {
  304. mr->device = pd->device;
  305. mr->pd = pd;
  306. mr->uobject = NULL;
  307. atomic_inc(&pd->usecnt);
  308. atomic_set(&mr->usecnt, 0);
  309. }
  310. return mr;
  311. }
  312. EXPORT_SYMBOL(ib_reg_phys_mr);
  313. int ib_rereg_phys_mr(struct ib_mr *mr,
  314. int mr_rereg_mask,
  315. struct ib_pd *pd,
  316. struct ib_phys_buf *phys_buf_array,
  317. int num_phys_buf,
  318. int mr_access_flags,
  319. u64 *iova_start)
  320. {
  321. struct ib_pd *old_pd;
  322. int ret;
  323. if (!mr->device->rereg_phys_mr)
  324. return -ENOSYS;
  325. if (atomic_read(&mr->usecnt))
  326. return -EBUSY;
  327. old_pd = mr->pd;
  328. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  329. phys_buf_array, num_phys_buf,
  330. mr_access_flags, iova_start);
  331. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  332. atomic_dec(&old_pd->usecnt);
  333. atomic_inc(&pd->usecnt);
  334. }
  335. return ret;
  336. }
  337. EXPORT_SYMBOL(ib_rereg_phys_mr);
  338. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  339. {
  340. return mr->device->query_mr ?
  341. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  342. }
  343. EXPORT_SYMBOL(ib_query_mr);
  344. int ib_dereg_mr(struct ib_mr *mr)
  345. {
  346. struct ib_pd *pd;
  347. int ret;
  348. if (atomic_read(&mr->usecnt))
  349. return -EBUSY;
  350. pd = mr->pd;
  351. ret = mr->device->dereg_mr(mr);
  352. if (!ret)
  353. atomic_dec(&pd->usecnt);
  354. return ret;
  355. }
  356. EXPORT_SYMBOL(ib_dereg_mr);
  357. /* Memory windows */
  358. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  359. {
  360. struct ib_mw *mw;
  361. if (!pd->device->alloc_mw)
  362. return ERR_PTR(-ENOSYS);
  363. mw = pd->device->alloc_mw(pd);
  364. if (!IS_ERR(mw)) {
  365. mw->device = pd->device;
  366. mw->pd = pd;
  367. mw->uobject = NULL;
  368. atomic_inc(&pd->usecnt);
  369. }
  370. return mw;
  371. }
  372. EXPORT_SYMBOL(ib_alloc_mw);
  373. int ib_dealloc_mw(struct ib_mw *mw)
  374. {
  375. struct ib_pd *pd;
  376. int ret;
  377. pd = mw->pd;
  378. ret = mw->device->dealloc_mw(mw);
  379. if (!ret)
  380. atomic_dec(&pd->usecnt);
  381. return ret;
  382. }
  383. EXPORT_SYMBOL(ib_dealloc_mw);
  384. /* "Fast" memory regions */
  385. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  386. int mr_access_flags,
  387. struct ib_fmr_attr *fmr_attr)
  388. {
  389. struct ib_fmr *fmr;
  390. if (!pd->device->alloc_fmr)
  391. return ERR_PTR(-ENOSYS);
  392. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  393. if (!IS_ERR(fmr)) {
  394. fmr->device = pd->device;
  395. fmr->pd = pd;
  396. atomic_inc(&pd->usecnt);
  397. }
  398. return fmr;
  399. }
  400. EXPORT_SYMBOL(ib_alloc_fmr);
  401. int ib_unmap_fmr(struct list_head *fmr_list)
  402. {
  403. struct ib_fmr *fmr;
  404. if (list_empty(fmr_list))
  405. return 0;
  406. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  407. return fmr->device->unmap_fmr(fmr_list);
  408. }
  409. EXPORT_SYMBOL(ib_unmap_fmr);
  410. int ib_dealloc_fmr(struct ib_fmr *fmr)
  411. {
  412. struct ib_pd *pd;
  413. int ret;
  414. pd = fmr->pd;
  415. ret = fmr->device->dealloc_fmr(fmr);
  416. if (!ret)
  417. atomic_dec(&pd->usecnt);
  418. return ret;
  419. }
  420. EXPORT_SYMBOL(ib_dealloc_fmr);
  421. /* Multicast groups */
  422. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  423. {
  424. return qp->device->attach_mcast ?
  425. qp->device->attach_mcast(qp, gid, lid) :
  426. -ENOSYS;
  427. }
  428. EXPORT_SYMBOL(ib_attach_mcast);
  429. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  430. {
  431. return qp->device->detach_mcast ?
  432. qp->device->detach_mcast(qp, gid, lid) :
  433. -ENOSYS;
  434. }
  435. EXPORT_SYMBOL(ib_detach_mcast);