verbs.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/err.h>
  42. #include <linux/string.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_cache.h>
  45. /* Protection domains */
  46. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  47. {
  48. struct ib_pd *pd;
  49. pd = device->alloc_pd(device, NULL, NULL);
  50. if (!IS_ERR(pd)) {
  51. pd->device = device;
  52. pd->uobject = NULL;
  53. atomic_set(&pd->usecnt, 0);
  54. }
  55. return pd;
  56. }
  57. EXPORT_SYMBOL(ib_alloc_pd);
  58. int ib_dealloc_pd(struct ib_pd *pd)
  59. {
  60. if (atomic_read(&pd->usecnt))
  61. return -EBUSY;
  62. return pd->device->dealloc_pd(pd);
  63. }
  64. EXPORT_SYMBOL(ib_dealloc_pd);
  65. /* Address handles */
  66. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  67. {
  68. struct ib_ah *ah;
  69. ah = pd->device->create_ah(pd, ah_attr);
  70. if (!IS_ERR(ah)) {
  71. ah->device = pd->device;
  72. ah->pd = pd;
  73. ah->uobject = NULL;
  74. atomic_inc(&pd->usecnt);
  75. }
  76. return ah;
  77. }
  78. EXPORT_SYMBOL(ib_create_ah);
  79. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  80. struct ib_grh *grh, u8 port_num)
  81. {
  82. struct ib_ah_attr ah_attr;
  83. u32 flow_class;
  84. u16 gid_index;
  85. int ret;
  86. memset(&ah_attr, 0, sizeof ah_attr);
  87. ah_attr.dlid = wc->slid;
  88. ah_attr.sl = wc->sl;
  89. ah_attr.src_path_bits = wc->dlid_path_bits;
  90. ah_attr.port_num = port_num;
  91. if (wc->wc_flags & IB_WC_GRH) {
  92. ah_attr.ah_flags = IB_AH_GRH;
  93. ah_attr.grh.dgid = grh->sgid;
  94. ret = ib_find_cached_gid(pd->device, &grh->dgid, &port_num,
  95. &gid_index);
  96. if (ret)
  97. return ERR_PTR(ret);
  98. ah_attr.grh.sgid_index = (u8) gid_index;
  99. flow_class = be32_to_cpu(grh->version_tclass_flow);
  100. ah_attr.grh.flow_label = flow_class & 0xFFFFF;
  101. ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
  102. ah_attr.grh.hop_limit = grh->hop_limit;
  103. }
  104. return ib_create_ah(pd, &ah_attr);
  105. }
  106. EXPORT_SYMBOL(ib_create_ah_from_wc);
  107. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  108. {
  109. return ah->device->modify_ah ?
  110. ah->device->modify_ah(ah, ah_attr) :
  111. -ENOSYS;
  112. }
  113. EXPORT_SYMBOL(ib_modify_ah);
  114. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  115. {
  116. return ah->device->query_ah ?
  117. ah->device->query_ah(ah, ah_attr) :
  118. -ENOSYS;
  119. }
  120. EXPORT_SYMBOL(ib_query_ah);
  121. int ib_destroy_ah(struct ib_ah *ah)
  122. {
  123. struct ib_pd *pd;
  124. int ret;
  125. pd = ah->pd;
  126. ret = ah->device->destroy_ah(ah);
  127. if (!ret)
  128. atomic_dec(&pd->usecnt);
  129. return ret;
  130. }
  131. EXPORT_SYMBOL(ib_destroy_ah);
  132. /* Shared receive queues */
  133. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  134. struct ib_srq_init_attr *srq_init_attr)
  135. {
  136. struct ib_srq *srq;
  137. if (!pd->device->create_srq)
  138. return ERR_PTR(-ENOSYS);
  139. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  140. if (!IS_ERR(srq)) {
  141. srq->device = pd->device;
  142. srq->pd = pd;
  143. srq->uobject = NULL;
  144. srq->event_handler = srq_init_attr->event_handler;
  145. srq->srq_context = srq_init_attr->srq_context;
  146. atomic_inc(&pd->usecnt);
  147. atomic_set(&srq->usecnt, 0);
  148. }
  149. return srq;
  150. }
  151. EXPORT_SYMBOL(ib_create_srq);
  152. int ib_modify_srq(struct ib_srq *srq,
  153. struct ib_srq_attr *srq_attr,
  154. enum ib_srq_attr_mask srq_attr_mask)
  155. {
  156. return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
  157. }
  158. EXPORT_SYMBOL(ib_modify_srq);
  159. int ib_query_srq(struct ib_srq *srq,
  160. struct ib_srq_attr *srq_attr)
  161. {
  162. return srq->device->query_srq ?
  163. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  164. }
  165. EXPORT_SYMBOL(ib_query_srq);
  166. int ib_destroy_srq(struct ib_srq *srq)
  167. {
  168. struct ib_pd *pd;
  169. int ret;
  170. if (atomic_read(&srq->usecnt))
  171. return -EBUSY;
  172. pd = srq->pd;
  173. ret = srq->device->destroy_srq(srq);
  174. if (!ret)
  175. atomic_dec(&pd->usecnt);
  176. return ret;
  177. }
  178. EXPORT_SYMBOL(ib_destroy_srq);
  179. /* Queue pairs */
  180. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  181. struct ib_qp_init_attr *qp_init_attr)
  182. {
  183. struct ib_qp *qp;
  184. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  185. if (!IS_ERR(qp)) {
  186. qp->device = pd->device;
  187. qp->pd = pd;
  188. qp->send_cq = qp_init_attr->send_cq;
  189. qp->recv_cq = qp_init_attr->recv_cq;
  190. qp->srq = qp_init_attr->srq;
  191. qp->uobject = NULL;
  192. qp->event_handler = qp_init_attr->event_handler;
  193. qp->qp_context = qp_init_attr->qp_context;
  194. qp->qp_type = qp_init_attr->qp_type;
  195. atomic_inc(&pd->usecnt);
  196. atomic_inc(&qp_init_attr->send_cq->usecnt);
  197. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  198. if (qp_init_attr->srq)
  199. atomic_inc(&qp_init_attr->srq->usecnt);
  200. }
  201. return qp;
  202. }
  203. EXPORT_SYMBOL(ib_create_qp);
  204. int ib_modify_qp(struct ib_qp *qp,
  205. struct ib_qp_attr *qp_attr,
  206. int qp_attr_mask)
  207. {
  208. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  209. }
  210. EXPORT_SYMBOL(ib_modify_qp);
  211. int ib_query_qp(struct ib_qp *qp,
  212. struct ib_qp_attr *qp_attr,
  213. int qp_attr_mask,
  214. struct ib_qp_init_attr *qp_init_attr)
  215. {
  216. return qp->device->query_qp ?
  217. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  218. -ENOSYS;
  219. }
  220. EXPORT_SYMBOL(ib_query_qp);
  221. int ib_destroy_qp(struct ib_qp *qp)
  222. {
  223. struct ib_pd *pd;
  224. struct ib_cq *scq, *rcq;
  225. struct ib_srq *srq;
  226. int ret;
  227. pd = qp->pd;
  228. scq = qp->send_cq;
  229. rcq = qp->recv_cq;
  230. srq = qp->srq;
  231. ret = qp->device->destroy_qp(qp);
  232. if (!ret) {
  233. atomic_dec(&pd->usecnt);
  234. atomic_dec(&scq->usecnt);
  235. atomic_dec(&rcq->usecnt);
  236. if (srq)
  237. atomic_dec(&srq->usecnt);
  238. }
  239. return ret;
  240. }
  241. EXPORT_SYMBOL(ib_destroy_qp);
  242. /* Completion queues */
  243. struct ib_cq *ib_create_cq(struct ib_device *device,
  244. ib_comp_handler comp_handler,
  245. void (*event_handler)(struct ib_event *, void *),
  246. void *cq_context, int cqe)
  247. {
  248. struct ib_cq *cq;
  249. cq = device->create_cq(device, cqe, NULL, NULL);
  250. if (!IS_ERR(cq)) {
  251. cq->device = device;
  252. cq->uobject = NULL;
  253. cq->comp_handler = comp_handler;
  254. cq->event_handler = event_handler;
  255. cq->cq_context = cq_context;
  256. atomic_set(&cq->usecnt, 0);
  257. }
  258. return cq;
  259. }
  260. EXPORT_SYMBOL(ib_create_cq);
  261. int ib_destroy_cq(struct ib_cq *cq)
  262. {
  263. if (atomic_read(&cq->usecnt))
  264. return -EBUSY;
  265. return cq->device->destroy_cq(cq);
  266. }
  267. EXPORT_SYMBOL(ib_destroy_cq);
  268. int ib_resize_cq(struct ib_cq *cq,
  269. int cqe)
  270. {
  271. return cq->device->resize_cq ?
  272. cq->device->resize_cq(cq, cqe) : -ENOSYS;
  273. }
  274. EXPORT_SYMBOL(ib_resize_cq);
  275. /* Memory regions */
  276. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  277. {
  278. struct ib_mr *mr;
  279. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  280. if (!IS_ERR(mr)) {
  281. mr->device = pd->device;
  282. mr->pd = pd;
  283. mr->uobject = NULL;
  284. atomic_inc(&pd->usecnt);
  285. atomic_set(&mr->usecnt, 0);
  286. }
  287. return mr;
  288. }
  289. EXPORT_SYMBOL(ib_get_dma_mr);
  290. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  291. struct ib_phys_buf *phys_buf_array,
  292. int num_phys_buf,
  293. int mr_access_flags,
  294. u64 *iova_start)
  295. {
  296. struct ib_mr *mr;
  297. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  298. mr_access_flags, iova_start);
  299. if (!IS_ERR(mr)) {
  300. mr->device = pd->device;
  301. mr->pd = pd;
  302. mr->uobject = NULL;
  303. atomic_inc(&pd->usecnt);
  304. atomic_set(&mr->usecnt, 0);
  305. }
  306. return mr;
  307. }
  308. EXPORT_SYMBOL(ib_reg_phys_mr);
  309. int ib_rereg_phys_mr(struct ib_mr *mr,
  310. int mr_rereg_mask,
  311. struct ib_pd *pd,
  312. struct ib_phys_buf *phys_buf_array,
  313. int num_phys_buf,
  314. int mr_access_flags,
  315. u64 *iova_start)
  316. {
  317. struct ib_pd *old_pd;
  318. int ret;
  319. if (!mr->device->rereg_phys_mr)
  320. return -ENOSYS;
  321. if (atomic_read(&mr->usecnt))
  322. return -EBUSY;
  323. old_pd = mr->pd;
  324. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  325. phys_buf_array, num_phys_buf,
  326. mr_access_flags, iova_start);
  327. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  328. atomic_dec(&old_pd->usecnt);
  329. atomic_inc(&pd->usecnt);
  330. }
  331. return ret;
  332. }
  333. EXPORT_SYMBOL(ib_rereg_phys_mr);
  334. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  335. {
  336. return mr->device->query_mr ?
  337. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  338. }
  339. EXPORT_SYMBOL(ib_query_mr);
  340. int ib_dereg_mr(struct ib_mr *mr)
  341. {
  342. struct ib_pd *pd;
  343. int ret;
  344. if (atomic_read(&mr->usecnt))
  345. return -EBUSY;
  346. pd = mr->pd;
  347. ret = mr->device->dereg_mr(mr);
  348. if (!ret)
  349. atomic_dec(&pd->usecnt);
  350. return ret;
  351. }
  352. EXPORT_SYMBOL(ib_dereg_mr);
  353. /* Memory windows */
  354. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  355. {
  356. struct ib_mw *mw;
  357. if (!pd->device->alloc_mw)
  358. return ERR_PTR(-ENOSYS);
  359. mw = pd->device->alloc_mw(pd);
  360. if (!IS_ERR(mw)) {
  361. mw->device = pd->device;
  362. mw->pd = pd;
  363. mw->uobject = NULL;
  364. atomic_inc(&pd->usecnt);
  365. }
  366. return mw;
  367. }
  368. EXPORT_SYMBOL(ib_alloc_mw);
  369. int ib_dealloc_mw(struct ib_mw *mw)
  370. {
  371. struct ib_pd *pd;
  372. int ret;
  373. pd = mw->pd;
  374. ret = mw->device->dealloc_mw(mw);
  375. if (!ret)
  376. atomic_dec(&pd->usecnt);
  377. return ret;
  378. }
  379. EXPORT_SYMBOL(ib_dealloc_mw);
  380. /* "Fast" memory regions */
  381. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  382. int mr_access_flags,
  383. struct ib_fmr_attr *fmr_attr)
  384. {
  385. struct ib_fmr *fmr;
  386. if (!pd->device->alloc_fmr)
  387. return ERR_PTR(-ENOSYS);
  388. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  389. if (!IS_ERR(fmr)) {
  390. fmr->device = pd->device;
  391. fmr->pd = pd;
  392. atomic_inc(&pd->usecnt);
  393. }
  394. return fmr;
  395. }
  396. EXPORT_SYMBOL(ib_alloc_fmr);
  397. int ib_unmap_fmr(struct list_head *fmr_list)
  398. {
  399. struct ib_fmr *fmr;
  400. if (list_empty(fmr_list))
  401. return 0;
  402. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  403. return fmr->device->unmap_fmr(fmr_list);
  404. }
  405. EXPORT_SYMBOL(ib_unmap_fmr);
  406. int ib_dealloc_fmr(struct ib_fmr *fmr)
  407. {
  408. struct ib_pd *pd;
  409. int ret;
  410. pd = fmr->pd;
  411. ret = fmr->device->dealloc_fmr(fmr);
  412. if (!ret)
  413. atomic_dec(&pd->usecnt);
  414. return ret;
  415. }
  416. EXPORT_SYMBOL(ib_dealloc_fmr);
  417. /* Multicast groups */
  418. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  419. {
  420. if (!qp->device->attach_mcast)
  421. return -ENOSYS;
  422. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  423. return -EINVAL;
  424. return qp->device->attach_mcast(qp, gid, lid);
  425. }
  426. EXPORT_SYMBOL(ib_attach_mcast);
  427. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  428. {
  429. if (!qp->device->detach_mcast)
  430. return -ENOSYS;
  431. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  432. return -EINVAL;
  433. return qp->device->detach_mcast(qp, gid, lid);
  434. }
  435. EXPORT_SYMBOL(ib_detach_mcast);